home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
cpp_libs
/
awe2-0_1.lha
/
awe2-0.1
/
Src
/
RCS
/
CpuMultiplexor.cc,v
< prev
next >
Wrap
Text File
|
1989-03-22
|
20KB
|
949 lines
head 3.2;
branch ;
access ;
symbols ;
locks grunwald:3.2; strict;
comment @@;
3.2
date 89.02.20.15.33.56; author grunwald; state Exp;
branches ;
next 3.1;
3.1
date 88.12.20.13.48.34; author grunwald; state Exp;
branches ;
next 1.2;
1.2
date 88.11.02.13.39.56; author grunwald; state Exp;
branches ;
next 1.1;
1.1
date 88.10.30.13.03.05; author grunwald; state Exp;
branches ;
next ;
desc
@@
3.2
log
@Start using Gnu library heaps for schedulers
@
text
@// This may look like C code, but it is really -*- C++ -*-
//
// Copyright (C) 1988 University of Illinois, Urbana, Illinois
//
// written by Dirk Grunwald (grunwald@@cs.uiuc.edu)
//
#include "CpuMultiplexor.h"
#include "SpinLock.h"
#include "SpinFetchAndOp.h"
#include "Thread.h"
#include "ThreadContainer.h"
#include "ReserveByException.h"
#include <math.h>
//
// Things left to do:
//
// + Make the ThreadHeap really use the Gnu PairingHeap structure.
// Doug Lea added an iterator class & other enhancements.
//
// + Capture signals, transfer them to an Exception class. Can
// use this to implement time-slices & the like, as well as....
//
// + Put in *addCpu* and *removeCpu* calls to CpuMultiplexor.
// This would allow run-time addition/removal of CPUS, so
// you can tailor your program to system
// This is tricky. Should probably do it when you
// advance the clock, but it'll be tricky to get all
// the CPUs to agree on the barrier height for the
// rendezvous. Also might complicate the *distinct
// pools of threads per cpu*.
//
int CpuMultiplexors;
CpuMultiplexor *ThisCpu;
static SpinLock CpuMultiplexorsLock;
static int StopCpuMux;
SpinLock CpuCerrLock;
static SpinFetchAndOp GlobalCurrentEventsCounter(0);
static SpinLock GivingUpLock;
static int GivingUpCounter = 0;
static int GivingUpGeneration = 0;
//
// A currentEvents pile for each processor. The count is only correct
// if youve reserved the spin lock -- its used as a guess.
//
static SpinLock CurrentEventsLock[MaxCpuMultiplexors];
static int CurrentEventsCounter[MaxCpuMultiplexors];
static ThreadContainer *CurrentEvents[MaxCpuMultiplexors];
//
// This can not be private, or we wont see all the action
//
int CpuMuxDebugFlag = 0;
CpuMultiplexor::CpuMultiplexor(int debug) : systemContext(0, 0)
{
pNameTemplate = "CpuMux";
currentThread = 0;
iYam = 0;
pid = 0;
CpuMuxDebugFlag = debug;
ThisCpu = this;
terminated = &StopCpuMux;
sprintf(nameSpace, "[%s-%d] ", pNameTemplate, iYam);
pName = nameSpace;
allocateLocalEventStructures(0,1);
}
CpuMultiplexor::~CpuMultiplexor()
{
}
void
CpuMultiplexor::debug(int newdebug)
{
CpuMuxDebugFlag = newdebug;
}
int
CpuMultiplexor::debug()
{
return(CpuMuxDebugFlag);
}
void
CpuMultiplexor::terminateAll()
{
StopCpuMux = 1;
}
//
// Add a single CPU to a set of current CPUs. There is an advantage of
// having all child processes be spawned by CPU #0; all child signals
// will be caught by the single parent.
//
// This entry is called by a Thread.
//
void CpuMultiplexor::enrollCpu()
{
//
// move thread to master process. There's a distinct possibility
// that this guy will get stolen from Cpu #0 if everyone else is
// looking for work.
//
while (iYam != 0) {
currentThread -> cpuAffinity = 0;
rescheduleException.cpu(0);
raise( &rescheduleException );
}
//
// If we're only using a single
//
//
// raise an exception to do the actual fork. This means that
// control flow for the new child process will be in the
// stirItAround loop, as opposed to here.
//
enrollDismissCpuException.enroll();
raise( &enrollDismissCpuException );
currentThread -> cpuAffinity = -1;
}
void
CpuMultiplexor::dismissCpu()
{
assert(0);
}
void
CpuMultiplexor::allocateLocalEventStructures(int newIYam, int outOf)
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate CpuMux structures for " << newIYam << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
iYam = newIYam;
sprintf(nameSpace, "[%s-%d] ", pNameTemplate, iYam);
pName = nameSpace;
globalCurrentEventsCounter = &GlobalCurrentEventsCounter;
CurrentEventsCounter[iYam] = 0;
CurrentEvents[iYam] = AllocateHardwareCurrentEventsStructure();
myCurrentEvents = CurrentEvents[iYam];
myCurrentEventsLock = &CurrentEventsLock[iYam];
myCurrentEventsCounter = &CurrentEventsCounter[iYam];
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "set CpuMultiplexors to " << outOf << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
CpuMultiplexorsLock.reserve();
CpuMultiplexors = outOf;
GivingUpLock.reserve();
if (GivingUpCounter >= CpuMultiplexors) {
GivingUpGeneration++;
GivingUpCounter = 0;
}
GivingUpLock.release();
CpuMultiplexorsLock.release();
}
void
CpuMultiplexor::allocateEventStructures(int newIYam, int outOf)
{
allocateLocalEventStructures(newIYam, outOf);
}
void
CpuMultiplexor::deallocateEventStructures()
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Deallocate CpuMux structures for " << iYam << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
myCurrentEventsLock -> reserve();
//
// Move remaining events to another queue. We're not adding new events,
// just moving them around, so we don't increase GlobalCurrentEventsCounter
//
while ( CurrentEventsCounter[iYam] > 0 ) {
CurrentEventsLock[0].reserve();
assert(CurrentEvents[0] != 0);
while( ! myCurrentEvents -> isEmpty() ) {
CurrentEvents[0] -> add( myCurrentEvents -> remove() );
CurrentEventsCounter[0]++;
CurrentEventsCounter[iYam]--;
}
CurrentEventsLock[0].release();
}
CpuMultiplexorsLock.reserve();
CpuMultiplexors--;
GivingUpLock.reserve();
if (GivingUpCounter >= CpuMultiplexors) {
GivingUpGeneration++;
GivingUpCounter = 0;
}
GivingUpLock.release();
CpuMultiplexorsLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "set CpuMultiplexors to " << CpuMultiplexors;
cerr << " and trigger GivingUp\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
delete myCurrentEvents;
myCurrentEvents = 0;
CurrentEvents[iYam] = 0;
CurrentEventsCounter[iYam] = 0;
myCurrentEventsLock -> release();
}
void
CpuMultiplexor::fireItUp(int cpus, unsigned shared)
{
assert(cpus > 0);
if ( cpus > MaxCpuMultiplexors ) {
cpus = MaxCpuMultiplexors;
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate " << shared << " bytes of shared memory\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
if ( cpus > 1 ) {
extern void SharedMemoryInit( unsigned );
SharedMemoryInit( shared );
}
warmThePot(cpus);
stirItAround();
coolItDown();
}
void
CpuMultiplexor::warmThePot(int cpus)
{
assert(cpus > 0);
if ( cpus > MaxCpuMultiplexors ) {
cpus = MaxCpuMultiplexors;
}
CpuMultiplexors = cpus;
enabled = 1;
//
// Spawn the children, giving each a unique number from 0..(cpus-1).
// The first child gets id (cpus-1), and the original process gets 0.
//
iYam = 0;
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate " << CpuMultiplexors << "cpus\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
for (int whoAmI = 1; whoAmI < CpuMultiplexors; whoAmI++) {
if (iYam == 0) {
int pid = fork();
if (pid == 0) { // child
allocateEventStructures(whoAmI, CpuMultiplexors);
break;
}
}
}
pid = getpid();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "I am now id " << iYam << " and pid " << pid <<" \n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
}
void
CpuMultiplexor::coolItDown()
{
if (iYam > 0) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "exit\n";
CpuCerrLock.release();
}
#endif
deallocateEventStructures();
_exit(0);
}
else {
//
// reap the dead children. This way we know they are all dead.
// The caller can then safely exit.
//
while (CpuMultiplexors > 1) {
int pid = wait(0);
if (pid == -1) {
perror("wait");
break;
}
}
//
// In case of break in above loop
//
CpuMultiplexors = 1;
}
}
void
CpuMultiplexor::add(Thread *who)
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
if (who != 0 && who -> name() != 0) {
cerr << name() << " add " << who -> name() << "\n";
} else {
cerr << name() << " add " << hex(long(who)) << "\n";
}
CpuCerrLock.release();
}
#endif /* NDEBUG */
myCurrentEventsLock -> reserve();
addUnlocked( who );
(*myCurrentEventsCounter)++;
myCurrentEventsLock -> release();
GlobalCurrentEventsCounter.add(1);
}
void
CpuMultiplexor::addToAnother(int cpu, Thread *who)
{
assert( cpu >= 0 && cpu < CpuMultiplexors );
CurrentEventsLock[cpu].reserve();
CurrentEvents[cpu] -> add( who );
CurrentEventsCounter[cpu]++;
CurrentEventsLock[cpu].release();
GlobalCurrentEventsCounter.add(1);
}
Thread *
CpuMultiplexor::remove()
{
//
// Check to see if there is a current event, either in our current
// events queue or someone elses current events queue. If there is
// nothing, return 0.
//
Thread *threadToExecute = 0;
//
// System stopped?
//
if (StopCpuMux) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " Stopping muxing \n" ;
CpuCerrLock.release();
}
#endif /* NDEBUG */
return(0);
}
//
// I got something to do?
//
myCurrentEventsLock -> reserve();
if ( *myCurrentEventsCounter > 0 ) {
threadToExecute = myCurrentEvents -> remove();
(*myCurrentEventsCounter) --;
}
myCurrentEventsLock -> release();
//
// Maybe someone else has something to do?
//
if ( threadToExecute == 0 && GlobalCurrentEventsCounter.value() > 0 ) {
int ask = iYam;
do {
ask++; // start with next person,
if ( ask >= CpuMultiplexors ) { // wrap around for fairness
ask = 0;
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Ask " << ask << " about events \n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
//
// Note that were *not* locking before looking
// at CurrentEventsCount -- we treat this as a *guess*
// before bothering to lock on it. Admittedly, this could
// cause a problem, so maybe the second time around,
// we should always reserve and then look.
//
if ( CurrentEventsCounter[ask] > 0) {
CurrentEventsLock[ask].reserve();
if ( CurrentEventsCounter[ask] > 0) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "Found one in " << ask << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
threadToExecute = CurrentEvents[ask] -> remove();
//
// Check that this thread isnt trying to get to
// a specific CPU.
//
if (threadToExecute -> cpuAffinity > 0 &&
threadToExecute -> cpuAffinity != iYam) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "but returned it because of afinity\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
CurrentEvents[ask] -> add(threadToExecute);
threadToExecute = 0;
}
else {
CurrentEventsCounter[ask]--;
}
}
CurrentEventsLock[ask].release();
}
} while (ask != iYam && threadToExecute == 0);
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "find ";
if (threadToExecute == 0) {
cerr << "nothing\n";
} else {
cerr << threadToExecute -> name() << "\n";
}
CpuCerrLock.release();
}
#endif /* NDEBUG */
if ( threadToExecute != 0 ) {
GlobalCurrentEventsCounter.add(-1);
}
return( threadToExecute );
}
//
// Exception handlers
//
void
CpuMultiplexor::raise(ExceptionClass *by)
{
assert(currentThread != 0 && enabled);
raisedBy = by;
currentThread -> pContext.switchContext( &systemContext );
}
//
// This is the job dispatcher.
//
void
CpuMultiplexor::stirItAround()
{
currentThread = 0;
if (!enabled) {
cerr << "Need to initialize CpuMultiplexor before using it\n";
}
while( ! StopCpuMux ) {
while ( currentThread == 0 ) {
currentThread = remove();
if (currentThread != 0) {
break;
}
GivingUpLock.reserve();
GivingUpCounter++;
assert( GivingUpCounter > 0 && GivingUpCounter <= CpuMultiplexors);
if ( GivingUpCounter == CpuMultiplexors
&& GlobalCurrentEventsCounter.value() == 0)
{
GivingUpGeneration ++;
GivingUpCounter = 0;
GivingUpLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "give up\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
return;
}
else {
int generation = GivingUpGeneration;
GivingUpLock.release();
while( generation == GivingUpGeneration
&& GlobalCurrentEventsCounter.value() == 0
&& !StopCpuMux )
{
extern makeVolatile(void *);
makeVolatile(&generation);
}
GivingUpLock.reserve();
if ( GivingUpGeneration != generation || StopCpuMux ) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " giving up\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
GivingUpLock.release();
return;
}
else {
GivingUpCounter--;
assert(GivingUpCounter >= 0);
GivingUpLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " check for something";
cerr << " i have " << *myCurrentEventsCounter;
cerr << " out of " ;
cerr << GlobalCurrentEventsCounter.value() << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
}
}
}
#ifndef NDEBUG
if (CpuMuxDebugFlag || currentThread -> debug()) {
CpuCerrLock.reserve();
cerr << name() << " switch to ";
cerr << currentThread->name() << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
#ifdef DEBUG_MALLOC
assert( malloc_verify() );
#endif DEBUG_MALLOC
systemContext.switchContext(&(currentThread -> pContext));
#ifdef DEBUG_MALLOC
assert( malloc_verify() );
#endif DEBUG_MALLOC
assert(raisedBy != 0);
raisedBy -> handleException();
raisedBy = 0;
}
}
@
3.1
log
@Steay version
@
text
@d612 4
a615 1
d617 4
@
1.2
log
@Before using SpinEvents for global coordination
@
text
@d37 1
a37 1
static SpinLock CpuEnrollLock;
a42 3
//
// The count of all all current events -- this speeds up exiting
//
a43 1
d45 2
a46 2
static int GivingUpCounter;
static int GivingUpGeneration;
d64 1
a68 1
allocateEventStructures(0);
d71 4
d100 3
a102 1
// Add a single CPU to set of current CPUs
d104 2
d108 21
a128 1
assert(0);
d138 1
a138 1
CpuMultiplexor::allocateEventStructures(int newIYam)
d140 8
d149 1
a149 1
sprintf(nameSpace, "[CpuMultiplexor-%d] ", iYam);
d151 3
a153 1
d156 1
a156 1
d160 20
a179 1
globalCurrentEventsCounter = &GlobalCurrentEventsCounter;
d183 6
d191 8
d201 2
a202 1
// Move remaining events to another queue
d204 1
a204 1
while ( CurrentEventsCounter[iYam] == 0 ) {
d207 5
a211 3
CurrentEvents[0] -> add( myCurrentEvents -> remove() );
CurrentEventsCounter[0]++;
CurrentEventsCounter[iYam]--;
d214 22
a240 5
GivingUpLock.reserve();
GivingUpCounter = 0;
GivingUpGeneration = 1;
GivingUpLock.release();
d301 1
a301 1
allocateEventStructures(whoAmI);
d327 1
a340 1
CpuMultiplexors--;
d367 2
a368 1
globalCurrentEventsCounter -> add(1);
d374 1
a374 1
assert( cpu > 0 && cpu < CpuMultiplexors );
d379 2
a416 1
GlobalCurrentEventsCounter.add(-1);
d424 1
a424 1
while ( threadToExecute == 0 && GlobalCurrentEventsCounter.add(0) > 0 ) {
d457 20
a476 2
GlobalCurrentEventsCounter.add(-1);
CurrentEventsCounter[ask]--;
a480 14
if ( threadToExecute == 0 ) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "Did not find anything, may look again";
cerr << " # of global events = ";
cerr << GlobalCurrentEventsCounter.add(0);
cerr << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
}
d488 1
a488 1
cerr << " nothing\n";
d495 4
d530 1
d532 1
a535 5
//
// Ok, we have decided that there is nothing for us to do,
// and there is nothing for anyone else to do either (maybe).
// So we need to decide if we are going to advance time or not.
//
a536 9
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "Did not find anything, maybe rendezvous\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
d538 1
a538 1
oldGeneration = GivingUpGeneration;
d540 6
a545 15
//
// If I am here, I have nothing to do. If everyone
// else is here, they have nothing to do. If I am
// the last one here, no one has anything to do.
// Ergo, time to move on.
//
if ( GivingUpCounter == CpuMultiplexors ) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "advance GivingUp generation count\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
d547 1
a547 1
GivingUpGeneration++;
d550 9
d562 2
d565 7
a571 27
}
//
// Now, wait for either a new generation or a new event
//
int giveUp = 0;
for (;;) {
if ( GlobalCurrentEventsCounter.value() > 0 ) {
//
// Race condition. Not certain why this occurs, but
// it does (measured by inserting a print).
//
GivingUpLock.reserve();
if ( oldGeneration != GivingUpGeneration ) {
// CpuCerrLock.reserve();
// cerr << name() << "would have crapped out here\n";
// CpuCerrLock.release();
GivingUpLock.release();
}
else {
GivingUpCounter--;
GivingUpLock.release();
break;
}
d573 3
a575 5
if (StopCpuMux || oldGeneration != GivingUpGeneration) {
giveUp = 1;
break;
}
d577 6
a582 14
int val = GivingUpCounter;
int gen = GivingUpGeneration;
int exitSoon = ( val == 0 && oldGeneration < gen);
int waitForMore = ( val != 0 && oldGeneration == gen);
if ( ! (exitSoon || waitForMore) ) {
//
// Lock to make certain this is an error
//
GivingUpLock.reserve();
val = GivingUpCounter;
gen = GivingUpGeneration;
exitSoon = ( val == 0 && oldGeneration < gen);
waitForMore = ( val != 0 && oldGeneration == gen);
d584 1
d586 13
a598 19
if ( ! (exitSoon || waitForMore) ) {
CpuCerrLock.reserve();
cerr << name() << " exitSoon = " << exitSoon <<"\n";
cerr << name() << " waitForMore = " << waitForMore <<"\n";
cerr << name() << " GivingUpCounter.value() = ";
cerr << val << "\n";
cerr << name() << " GlobalCurrentEventsCounter.value() = ";
cerr << GlobalCurrentEventsCounter.value() << "\n";
cerr << name() << " oldGeneration = ";
cerr << oldGeneration << "\n";
cerr << name() << " GivngUpGeneration = ";
cerr << gen << "\n";
cerr.flush();
CpuCerrLock.release();
assert(exitSoon);
assert(waitForMore);
}
a599 8
}
if (giveUp) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " giving up\n";
CpuCerrLock.release();
a600 2
#endif /* NDEBUG */
return;
@
1.1
log
@Initial revision
@
text
@d47 3
a49 1
static SpinFetchAndOp GivingUpCounter(0);
d150 2
a151 1
GivingUpCounter.set(0);
d153 1
d381 1
a381 1
cerr << " cnt = ";
d451 4
a454 2
int oldGeneration = GivingUpGeneration;
int givingUpCounterWas = GivingUpCounter.add(1);
d461 1
a461 1
if (givingUpCounterWas == CpuMultiplexors-1) {
d472 2
a473 1
GivingUpCounter.set(0);
d476 3
d488 17
a504 2
GivingUpCounter.add(-1);
break;
d510 38
@